/* XXX evaluate all isyncs in segment code */
-static void flush_slb(struct vcpu *v)
+void flush_segments(void)
{
- struct slb_entry *slb0 = &v->arch.slb_entries[0];
+ struct slb_entry slb0;
+ ulong zero = 0;
- slbia();
+ __asm__ __volatile__(
+ "slbmfev %0,%2\n"
+ "slbmfee %1,%2\n"
+ :"=&r"(slb0.slb_vsid), "=&r"(slb0.slb_esid)
+ :"r"(zero)
+ :"memory");
/* we manually have to invalidate SLB[0] since slbia doesn't. */
/* XXX name magic constants! */
- if (slb0->slb_esid & (1 << (63 - 36))) {
+ if (slb0.slb_esid & SLB_ESID_VALID) {
ulong rb;
ulong class;
- class = (slb0->slb_vsid >> (63 - 56)) & 1ULL;
- rb = slb0->slb_esid & (~0ULL << (63 - 35));
- rb |= class << (63 - 36);
+ class = !!(slb0.slb_vsid & SLB_ESID_CLASS);
+ rb = slb0.slb_esid & SLB_ESID_MASK;
+ rb |= class << SLBIE_CLASS_LOG;
slbie(rb);
}
+ slbia();
}
void save_segments(struct vcpu *v)
#endif
}
- flush_slb(v);
+ flush_segments();
}
void load_segments(struct vcpu *v)
/* FIXME: should we bother to restore invalid entries */
/* stuff in the index here */
- esid |= i & ((0x1UL << (63 - 52 + 1)) - 1);
+ esid &= ~SLBMTE_ENTRY_MASK;
+ esid |= i;
__asm__ __volatile__(
"isync\n"
#endif
}
}
+
+void dump_segments(int valid)
+{
+ int i;
+
+ printk("Dump %s SLB entries:\n", valid ? "VALID" : "ALL");
+
+ /* save all extra SLBs */
+ for (i = 0; i < NUM_SLB_ENTRIES; i++) {
+ ulong vsid;
+ ulong esid;
+
+ __asm__ __volatile__(
+ "slbmfev %0,%2\n"
+ "slbmfee %1,%2\n"
+ :"=&r"(vsid), "=&r"(esid)
+ :"r"(i)
+ :"memory");
+
+ if (valid && !(esid & SLB_ESID_VALID))
+ continue;
+ printf("S%02d: 0x%016lx 0x%016lx\n", i, vsid, esid);
+ }
+}
extern int cpu_io_mfn(ulong mfn);
extern void save_cpu_sprs(struct vcpu *);
extern void load_cpu_sprs(struct vcpu *);
+extern void flush_segments(void);
+extern void dump_segments(int valid);
/* XXX this could also land us in GDB */
#define dump_execution_state() BUG()